## load packages
require(foreach)
require(sf)
require(tigris)
require(leaflet)
require(lubridate)
require(remotes)
require(mapboxapi)
require(mapview)
require(raster)
require(tidyverse); theme_set(theme_bw())
require(doParallel)
require(parallel)
require(exactextractr)
require(scales)
require(censusapi)
## import useful functions
Sum <- function(x) sum(x, na.rm = TRUE)
Mean <- function(x) mean(x, na.rm = TRUE)
Min <- function(x) min(x, na.rm = TRUE)
Max <- function(x) max(x, na.rm = TRUE)
toNumber <- function(x) as.numeric(paste(x))
sum.na <- function(x) sum(is.na(x))
ggcolor <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
acs_vars_2018_5yr <- listCensusMetadata(name = "2018/acs/acs5", type = "variables")
crs_planar <- 26910
crs_leaflet <- 4326
## bay area geography
bay_county_names <- c("Alameda", "Contra Costa", "Marin", "Napa", "San Francisco",
"San Mateo", "Santa Clara", "Solano", "Sonoma")
california <- counties('CA', cb = TRUE, progress_bar = FALSE) %>%
st_transform(crs_leaflet)
bay_counties <- california %>% filter(NAME %in% bay_county_names)
bay_cbgs <- block_groups(state = 'CA', county = bay_county_names, class = 'sf', progress_bar = FALSE) %>%
st_transform(crs_leaflet)
bay_places <- places(state = 'CA', class = 'sf', progress_bar = FALSE) %>%
st_transform(crs_planar) %>%
.[bay_cbgs %>% st_transform(crs_planar),] %>%
st_transform(crs_leaflet)
sanrafael <- bay_places %>% filter(NAME == 'San Rafael')
sr_cbgs <- bay_cbgs %>%
st_transform(crs_planar) %>%
st_centroid %>%
st_intersection(sanrafael %>% select(geometry) %>% st_transform(crs_planar)) %>%
st_drop_geometry %>%
right_join(bay_cbgs %>% select(GEOID), ., by = 'GEOID') %>%
rbind(bay_cbgs %>% filter(GEOID %in% c('060411082004', '060411090021', '060411121002',
'060411101002', '060411102002')))
## Warning in st_centroid.sf(.): st_centroid assumes attributes are constant over
## geometries of x
## Warning: attribute variables are assumed to be spatially constant throughout all
## geometries
marin_tracts <- tracts(state = 'CA', county = 'Marin', progress_bar = FALSE) %>%
st_transform(crs_leaflet)
sr_tracts <- marin_tracts %>% filter(GEOID %in% str_sub(sr_cbgs$GEOID, end = -2))
leaflet() %>%
addMapboxTiles(style_id = "light-v9", username = "mapbox") %>%
addPolygons(data = bay_cbgs %>% filter(COUNTYFP == '041'), color = 'gray', fill = FALSE, weight = 2) %>%
addPolygons(data = sanrafael, color = 'red') %>%
addPolygons(data = sr_cbgs, color = 'blue', weight = 2, label = ~GEOID)
I’ve decided to look at San Rafael in Marin County, as shown in the map above. The outline of the census-designated place is in red, and the CBGs that compose my analysis are shown in blue. San Rafael is the county seat of Marin and is also its largest city, with almost 60,000 residents. Of those, about 14,000 people (almost a quarter of the city’s population) live under 3 feet above sea level. The city has been identified as one with high climate risk.
## get SLR scenarios
sr_boundary <- sanrafael %>% st_transform(crs_leaflet)
sr_bbox <- sr_boundary %>% st_bbox()
# for (slr in c("000", "025", "050", "075", "100")) {
# for (rp in c("001", "020", "100")) {
# print(paste0("SLR", slr, "_RP", rp))
#
# url <- paste0(
# "http://geo.pointblue.org/ocofmap/zip_v2.php?activelayer=Inner+Bay%20",
# "Flood%20Depth%20", slr, "cm%20SLR%20%2B%20Wave%20", rp,
# "&left=", sr_bbox["xmin"], "&right=", sr_bbox["xmax"],
# "&upper=", sr_bbox["ymax"], "&lower=", sr_bbox["ymin"])
# temp <- tempfile()
# download.file(url, destfile = temp, mode = "wb")
# flood <- raster(unzip(temp, exdir = tempfile())[1])
# unlink(temp)
# values(flood)[which(values(flood)==0)] <- NA
# writeRaster(flood, paste0("./SLR", slr, "_RP", rp, "_epa_flood.tif"), overwrite = T)
# }
# }
## plot worst-case scenario flood
flood <- raster('./SLR100_RP100_epa_flood.tif') %>% raster::aggregate(fact = 5)
flood_pal <- colorNumeric(palette = "Blues", domain = values(flood), na.color = "transparent")
leaflet() %>%
addMapboxTiles(style_id = "satellite-streets-v11", username = "mapbox") %>%
addRasterImage(flood, colors = flood_pal, opacity = 0.75) %>%
addPolygons(data = sanrafael, color = 'red', weight = 2, fill = FALSE) %>%
addLegend(pal = flood_pal, values = values(flood), title = "Flood depth, cm")
## Warning in showSRID(uprojargs, format = "PROJ", multiline = "NO", prefer_proj
## = prefer_proj): Discarded ellps WGS 84 in CRS definition: +proj=merc +a=6378137
## +b=6378137 +lat_ts=0 +lon_0=0 +x_0=0 +y_0=0 +k=1 +units=m +nadgrids=@null
## +wktext +no_defs +type=crs
## Warning in showSRID(uprojargs, format = "PROJ", multiline = "NO", prefer_proj =
## prefer_proj): Discarded datum World Geodetic System 1984 in CRS definition
## Warning in showSRID(uprojargs, format = "PROJ", multiline = "NO", prefer_proj
## = prefer_proj): Discarded ellps WGS 84 in CRS definition: +proj=merc +a=6378137
## +b=6378137 +lat_ts=0 +lon_0=0 +x_0=0 +y_0=0 +k=1 +units=m +nadgrids=@null
## +wktext +no_defs +type=crs
## Warning in showSRID(uprojargs, format = "PROJ", multiline = "NO", prefer_proj =
## prefer_proj): Discarded datum World Geodetic System 1984 in CRS definition
Here is a map of the worst-case flooding scenario: 100 year return period event, coupled with 100cm of sea level rise. We can clearly see that some portions of the city, especially in the downtown area, are likely to see at least some level of inundation. However, a lot of the city to the north and west does not see any inundation even in this maximum scenario. Therefore we should expect to see no flood loss to vehicles in these areas.
## get current vehicle counts from ACS
sr_vehicles <-
getCensus(name = "acs/acs5", vars = 'group(B08201)', vintage = 2018,
region = 'tract:*', regionin = 'state:06+county:041') %>%
mutate(GEOID = paste0(state, county, tract)) %>%
filter(GEOID %in% sr_tracts$GEOID) %>%
select(-state, -county, -tract, -NAME, -GEO_ID) %>%
select(-ends_with('M'), -ends_with('A')) %>%
pivot_longer(ends_with('E'), names_to = 'variable', values_to = 'estimate') %>%
left_join(acs_vars_2018_5yr %>% select(name, label), by = c('variable' = 'name')) %>%
separate(label, c(NA, NA, 'household', 'vehicles'), sep = '!!', fill = 'right') %>%
filter(!is.na(vehicles))
sr_vehicles_pct <- sr_vehicles %>%
group_by(vehicles) %>%
summarize(estimate = sum(estimate), .groups = 'drop') %>%
mutate(percentage = estimate/sum(estimate))
total_vehicles <- sr_vehicles %>%
group_by(vehicles) %>%
summarize(estimate = sum(estimate), .groups = 'drop') %>%
mutate(num_vehicles = c(1,2,3,4,0),
total_vehicles = num_vehicles*estimate) %>%
pull(total_vehicles) %>% sum
## get projected future vehicle counts from EMFAC
emfac <- read_csv("./EMFAC2017-ER-2011Class-Marin2018-2020-2030-2040-2050-Annual-20201128130701.csv",
col_types = cols(), skip = 8)
marin_vehicles <- emfac %>%
group_by(year = `Calendar Year`) %>%
summarize(estimate = sum(Population), .groups = 'drop') %>%
mutate(scale_factor = estimate / estimate[1])
Now that we’ve collected hazard data, we can move on to exposure. This analysis focuses specifically on the number of vehicles at risk with the city of San Rafael. Based on data from the 2018 5-year American Community Survey (ACS), there are 43,721 vehicles in the census tracts that define our study area. We’ll reduce this number slightly later on to account for only the vehicles that fall within the San Rafael boundary.
Now that we know the number of vehicles, we need to assign them to households. ACS Table B08201 gives the number of vehicles allocated to each household at the census tract level, broken down by the number of people in each household. We are not going to be using information about the number of residents in each household, so we can aggregate to get to a distribution of vehicle ownership within our study area. We
In San Rafael, 7.23% of households do not own a vehicle and 36.97% own a single vehicle. We are not capturing the first group in this analysis even though their mobility patterns are likely the most vulnerable to disruption, but we will do some additional calculations to assess the single-vehicle group later on.
Using data from the CA Emissions Factor Model (EMFAC), we can then estimate how many vehicles will be on the road in Marin County in the next three decades. If we assume that San Rafael represents a constant percentage of all vehicles within Marin, and that the demographics of car ownership stay the same (i.e. the percentages of households with 1 car, 2 cars, etc.) then we can scale the values to estimate the future number of vehicles on the road for our analysis.
## estimate the number of households in San Rafael
num_households <-
getCensus(name = 'acs/acs5/profile', vars = 'DP04_0001E', vintage = 2018,
regionin = 'state:06+county:041', region = 'tract:*') %>%
mutate(GEOID = paste0(state, county, tract)) %>%
filter(GEOID %in% str_sub(sr_cbgs$GEOID, end = -2)) %>%
pull(DP04_0001E) %>% sum
## get building footprints from OpenStreetMap
osm_bldg <- st_read('./OSM_norcal/gis_osm_buildings_a_free_1.shp', quiet = TRUE)
sr_buildings <- osm_bldg[sr_tracts,] %>% st_transform(crs_leaflet)
## although coordinates are longitude/latitude, st_intersects assumes that they are planar
## plot results
leaflet() %>%
addMapboxTiles(style_id = "satellite-streets-v11", username = "mapbox") %>%
addPolygons(data = sr_tracts, weight = 1) %>%
addPolygons(data = sr_buildings, color = 'red', fillOpacity = 1, weight = 1)
We have two estimates of the number of households in our study area: 25378 comes from the ACS table on vehicles (B08021), and 26440 comes from the ACS summary statistics (DP04). When we load the Mapbox OpenStreetMap data, though, we only get 857 buildings, which seems incorrect. When we look at the map, we can see that large swaths of what look like neighborhoods have no buildings identified.
## get building footprints from Microsoft
marin_buildings <- st_read('./ca_06041_footprints.csv', crs = crs_leaflet, quiet = TRUE)
sr_buildings <- marin_buildings %>% st_transform(crs_planar) %>%
.[sr_tracts %>% st_transform(crs_planar),] %>%
st_intersection(sr_tracts %>% st_transform(crs_planar)) %>%
st_transform(crs_leaflet)
## Warning: attribute variables are assumed to be spatially constant throughout all
## geometries
## plot results
leaflet() %>%
addMapboxTiles(style_id = "satellite-streets-v11", username = "mapbox") %>%
addPolygons(data = sr_tracts, weight = 1) %>%
addPolygons(data = sr_buildings, color = 'red', fillOpacity = 1, weight = 1)
Let’s look at the AI-generated Microsoft building footprints dataset instead. Here we find a total of 16777 buildings within the dataset, which is much closer to the number of households that we measured earlier. (Note that more than one household can exist within a single building, as we’ll explore more in a bit.) The map also looks more accurate; buildings are scattered throughout the study area, and the only spaces that are empty are clearly open space. We can now move on to assigning vehicles to buildings.
## determine the number of units within each structure
profile_vars <- listCensusMetadata(name = 'acs/acs5/profile', vintage = 2018)
marin_households <-
getCensus(name = 'acs/acs5/profile', vars = 'group(DP04)', vintage = 2018,
regionin = 'state:06+county:041', region = 'tract:*') %>%
select(-state, -GEO_ID, -NAME) %>%
select(-ends_with('M'), -ends_with('A'), -ends_with('PE')) %>%
pivot_longer(cols = ends_with('E'), names_to = 'variable', values_to = 'estimate') %>%
left_join(profile_vars %>% select('name', 'label'), by = c('variable' = 'name')) %>%
left_join(sr_tracts %>% select(TRACTCE, GEOID) %>% st_drop_geometry, by = c('tract' = 'TRACTCE')) %>%
separate(label, c(NA, 'group_title', 'group', 'description'), sep = '!!', fill = 'right')
sr_households <- marin_households %>%
filter(GEOID %in% sr_tracts$GEOID) %>%
filter(group_title == 'UNITS IN STRUCTURE' & !is.na(description)) %>%
mutate(units = case_when(grepl('1-unit', description) ~ 1,
grepl('2 units', description) ~ 2,
grepl('3 or 4 units', description) ~ 4,
grepl('5 to 9 units', description) ~ 9,
grepl('10 to 19 units', description) ~ 19,
grepl('20 or more units', description) ~ 25)) %>%
filter(!is.na(units)) %>%
group_by(GEOID, units) %>%
summarize(estimate = sum(estimate), .groups = 'drop_last') %>%
mutate(buildings = estimate/units,
building_pct = buildings/sum(buildings))
## 1. allocate households to building footprints
sr_buildings <- sr_buildings %>%
select(-WKT) %>%
mutate(footprint_area = sr_buildings %>% st_transform(crs_planar) %>% st_area %>% toNumber,
obs.id = 1:nrow(.))
sr_buildings_id <- sr_buildings %>% st_drop_geometry %>% select(GEOID, footprint_area, obs.id)
unitcounts = c(25, 19, 9, 4, 2, 1)
sr_buildings_new <-
foreach(ct = unique(sr_tracts$GEOID), .combine = 'rbind') %do% {
sr_buildings_subset <- sr_buildings_id %>% filter(GEOID == ct) %>% arrange(desc(footprint_area))
sr_households_subset <- sr_households %>% filter(GEOID == ct)
housecounts <- c(0, round(cumsum(rev(sr_households_subset$building_pct)) * nrow(sr_buildings_subset)))
foreach(i = 1:6, .combine = 'rbind') %do% {
sr_buildings_subset[rep((housecounts[i]+1):(housecounts[i+1]), each = unitcounts[i]),]
}
}
## 2. allocate vehicles to households
sr_buildings_new <- sr_buildings_new %>%
.[sample(x = 1:nrow(.), size = nrow(.), replace = FALSE),] %>%
mutate(house.id = 1:nrow(.))
sr_vehicles_label <- sr_vehicles %>%
filter(GEOID == GEOID[1]) %>%
transmute(household = toNumber(str_sub(household, end = 1)),
vehicles = toNumber(str_sub(vehicles, end = 1)) %>% ifelse(is.na(.), 0, .))
## Warning: Problem with `mutate()` input `vehicles`.
## i NAs introduced by coercion
## i Input `vehicles` is ``%>%`(...)`.
## Warning in toNumber(str_sub(vehicles, end = 1)): NAs introduced by coercion
sr_buildings_label <-
foreach(ct = unique(sr_tracts$GEOID), .combine = 'rbind') %do% {
sr_buildings_subset <- sr_buildings_new %>% filter(GEOID == ct)
sr_vehicles_subset <- sr_vehicles %>% filter(GEOID == ct)
vehiclecounts <- round(sr_vehicles_subset$estimate /
sum(sr_vehicles_subset$estimate) * nrow(sr_buildings_subset))
foreach(i = 1:20, .combine = 'rbind') %do% {
sr_vehicles_label[rep(i, vehiclecounts[i]),]
}
}
sr_buildings_label <- rbind(sr_buildings_label, sr_buildings_label[1,])
sr_buildings_new <- sr_buildings_new %>%
mutate(household = sr_buildings_label$household,
vehicles = sr_buildings_label$vehicles)
## 3. allocate vehicle types to vehicles
temp <- NULL
for (i in 1:nrow(sr_buildings_new)) {
if (sr_buildings_new$vehicles[i] > 1) {
temp <- rbind(temp, sr_buildings_new[rep(i, sr_buildings_new$vehicles[i]),])
}
}
sr_vehicles_new <- sr_buildings_new %>% filter(vehicles == 1) %>% rbind(temp)
pct_sedans <- emfac %>%
filter(`Calendar Year` == 2018) %>%
group_by(`Vehicle Category`) %>%
summarize(estimate = sum(Population), .groups = 'drop') %>%
mutate(percentage = estimate/sum(estimate)) %>%
pull(percentage) %>% .[1]
sr_vehicles_new$type <- sample(c('sedan', 'pickup'), size = nrow(sr_vehicles_new),
replace = TRUE, prob = c(pct_sedans, 1-pct_sedans))
Assigning vehicles is a three-step process. First, we need to break out buildings into their individual households. We can do this with the DP04 group of ACS tables, which measure the number of units in a residential structure. However, we do not know from the Microsoft data which structures are single-family homes, for instance, and which ones are multi-story apartment complexes. Therefore we will assume that there is a linear correlation between the area of a structure’s footprint and the number of units within it. Let’s say for a given census tract that there are 50% single-family units, 25% duplexes, 15% with 3-10 units, and 10% with more than ten units. In this case the top 10% of structures by footprint area would be assigned as more than ten units, the next highest 15% would be assigned 3-10 units, etc. The buildings with multiple units are then duplicated in our dataset so that we can assign a different number of vehicles to each distinct household. After we complete this process, we have 25898 households with location data, which is very close to the two estimates of the number of households we reported above. This is a good “gut check” to confirm that we’re representing exposure well within our study area.
The next step is to allocate vehicles to households. Although we have information about the number of people living within a household, we assume that this is completely uncorrelated with the unit information above. Within each census tract, we have a percentage of households with no vehicles, one vehicle, two vehicles, etc. We randomly assign a designation to each household in the census tract, then if there are multiple vehicles in a household, we duplicate that record again to represent each individual vehicle. At the end of this process we now have 44604 records of vehicles tagged to location data, which is quite close to our initial count of 43721 vehicles within the study area. We have also retained the information about which households only have a single vehicle, which we will use later.
Finally, we need to assign a vehicle type to each vehicle. This is important information for the vulnerability and risk estimation steps of the SURF process, because different vehicles will (a) respond to inundation in different ways and (b) have different estimated replacement values. EMFAC considers four of its vehicle designations as “passenger vehicles.” These designations represent passenger cars (LDA) and three increasingly large types of trucks (LDT1, LDT2, MDV). We also collected vulnerability curves from the USACE EGM09-04 report for five types of vehicles: sedans, pickups, SUVs, sport cars, and minivans. These two classification systems don’t exactly line up, so we’ve simplified the vehicles down to two types. Sedans, or the LDA class, represent 57.68% of cars on the road. All other EMFAC classes will be represented by the Pickups vulnerability curve. The differences between the two vulnerability curves and their 90th percentile confidence intervals are shown below.
## collect vulnerability data on the relationship between flood depth and vehicle damage
depthdamage <- read.csv('./vehicle_depthdamage.csv')
depthdamage <- rbind(rep(0, ncol(depthdamage)), depthdamage)
## plot sedan vs. pickup vulnerability curves
ggplot(depthdamage) +
geom_line(aes(x = DepthAboveGround, y = Sedans_PctDamage, color = 'Sedans'), size = 1) +
geom_ribbon(aes(x = DepthAboveGround, fill = 'Sedans',
ymin = Sedans_PctDamage - Sedans_StdDev*1.645,
ymax = Sedans_PctDamage + Sedans_StdDev*1.645),
alpha = 0.25, show.legend = FALSE) +
geom_line(aes(x = DepthAboveGround, y = Pickups_PctDamage, color = 'Pickups'), size = 1) +
geom_ribbon(aes(x = DepthAboveGround, fill = 'Pickups',
ymin = Pickups_PctDamage - Pickups_StdDev*1.645,
ymax = Pickups_PctDamage + Pickups_StdDev*1.645),
alpha = 0.25, show.legend = FALSE) +
ggtitle('Vehicle Vulnerability Curves') +
labs(x = 'Feet of Inundation', y = 'Expected Damage Level', color = 'Vehicle Type') +
scale_x_continuous(expand = c(0,0)) +
scale_y_continuous(expand = c(0,0), labels = percent) +
scale_color_manual(values = c('royalblue1', 'darkorange1')) +
scale_fill_manual(values = c('royalblue1', 'darkorange1')) +
theme_classic()
#### find vehicle AAL for 2018 ####
## filter to only buildings with vehicles in our study area
sr_buildings <- sr_buildings %>%
st_transform(crs_planar) %>%
.[sanrafael %>% st_transform(crs_planar),] %>%
st_transform(crs_leaflet) %>%
filter(obs.id %in% sr_buildings_new[sr_buildings_new$vehicles > 0, 'obs.id'])
total_vehicles <- sr_vehicles_new %>%
right_join(sr_buildings %>% select(obs.id), ., by = 'obs.id') %>%
st_transform(crs_planar) %>%
.[sanrafael %>% st_transform(crs_planar),] %>%
st_transform(crs_leaflet) %>%
nrow
## get flood depths at every location
cl <- makeCluster(num_cores)
registerDoParallel(cl)
sr_building_depth <-
foreach (slr = c("000", "025", "050", "075", "100"),
.combine = 'rbind', .inorder = FALSE,
.packages = c('dplyr', 'raster', 'exactextractr')) %:%
foreach (rp = c("001", "020", "100"),
.combine = 'rbind', .inorder = FALSE,
.packages = c('dplyr', 'raster', 'exactextractr')) %dopar% {
flood <- raster(paste0("./SLR", slr, "_RP", rp, "_epa_flood.tif"))
data.frame(SLR = slr, RP = rp, obs.id = sr_buildings$obs.id,
depth = exact_extract(x = flood, y = sr_buildings, fun = 'mean', progress = FALSE) %>%
unlist %>% ifelse(is.nan(.) | .<0, 0, .))
}
stopCluster(cl)
## find the percentage of households flooded in each scenario
pct_flooded <- sr_building_depth %>%
group_by(SLR, RP) %>%
summarize(percentage_flooded = sum(depth>0)/length(depth), .groups = 'drop') %>%
arrange(desc(percentage_flooded)) %>% .$percentage_flooded %>% .[1]
## find the total cost of vehicles flooded in each scenario
damage <-
foreach (slr = c("000", "025", "050", "075", "100"), .combine = 'rbind') %:%
foreach (rp = c("001", "020", "100"), .combine = 'rbind') %do% {
sr_building_depth %>%
filter(SLR == slr & RP == rp) %>%
select(-SLR, -RP) %>%
right_join(sr_vehicles_new, ., by = 'obs.id') %>%
mutate(sedan_damage =
approx(x = c(depthdamage$DepthAboveGround*12*2.54, 500),
y = c(depthdamage$Sedans_PctDamage, 1), xout = depth)$y,
pickup_damage =
approx(x = c(depthdamage$DepthAboveGround*12*2.54, 500),
y = c(depthdamage$Pickups_PctDamage, 1), xout = depth)$y) %>%
mutate(sedan_cost = 20000 * sedan_damage,
pickup_cost = 35000 * pickup_damage) %>%
mutate(cost = case_when(type == 'sedan' ~ sedan_cost, type == 'pickup' ~ pickup_cost)) %>%
select(obs.id, house.id, cost) %>%
cbind(SLR = slr, RP = rp)
}
## convert to an AAL
AAL <- damage %>%
group_by(RP, SLR) %>%
summarize(cost = sum(cost), .groups = 'drop_last') %>%
filter(SLR %in% c('000', '025')) %>%
summarize(cost = mean(cost), .groups = 'drop') %>%
pivot_wider(names_from = RP, values_from = cost, values_fn = Sum) %>%
mutate(total_cost = 0.95*(`001`+`020`)/2 + 0.04*(`020`+`100`)/2 + 0.01*(`100`))
One last thing to take care of: right now, we have vehicles assigned to all buildings within the census tracts that encompass San Rafael. After we’ve completed the assignment of vehicles to locations, we need to do one final subsetting and hold on to only the buildings (and vehicles) that fall within census-designated San Rafael boundary. This leaves us with 39,820 vehicles to estimate loss for.
With vehicle locations and vulnerability information defined, we can move on to predicting the average annualized loss (AAL). We assumed that the cost of replacement was $20,000 for a sedan and $35,000 for a pickup. We also assumed that all vehicles would stay in place and none would be moved before a flood event. We decided that only a mandatory evacuation would be enough to convince people to move their cars. Even in the most extreme flooding scenario, only about 13.86% of households are inundated, which would not be enough to warrant a mandatory evacuation.
The current overall AAL for vehicle flood losses in all of San Rafael was found to be $15,052,384. Let’s explore that number a bit further within the study area.
sr_building_cbg <- sr_buildings %>%
st_transform(crs_planar) %>%
st_intersection(sr_cbgs %>% st_transform(crs_planar)) %>%
st_transform(crs_leaflet) %>%
rename(CBG = GEOID.1)
## Warning: attribute variables are assumed to be spatially constant throughout all
## geometries
damage.id <- damage %>% left_join(sr_building_cbg %>% select(obs.id, GEOID, CBG), ., by = 'obs.id')
damage.plot <- damage.id %>%
st_drop_geometry %>%
group_by(CBG, RP, SLR) %>%
summarize(cost = sum(cost), .groups = 'drop_last') %>%
filter(SLR %in% c('000','025')) %>%
summarize(cost = mean(cost), .groups = 'drop') %>%
pivot_wider(names_from = RP, values_from = cost, values_fn = Sum) %>%
mutate(total_cost = 0.95*(`001`+`020`)/2 + 0.04*(`020`+`100`)/2 + 0.01*(`100`)) %>%
left_join(sr_cbgs %>% select(GEOID), ., by = c('GEOID' = 'CBG'))
canalst <- damage.plot %>% arrange(desc(total_cost)) %>% .$GEOID %>% .[1:2]
canalst_pct <- (damage.plot %>% filter(GEOID %in% canalst) %>% .$total_cost %>% sum) /
sum(damage.plot$total_cost)
## AAL
ggplot() +
geom_sf(data = sr_cbgs, fill = 'white') +
geom_sf(data = damage.plot %>% filter(total_cost > 0), aes(fill = total_cost/1e6)) +
geom_sf(data = sanrafael, fill = NA, color = 'black', size = 1) +
scale_fill_distiller(name = 'Loss ($M)', palette = 'Reds', direction = 1, label = comma) +
ggtitle('Vehicle Flood AAL by CBG') +
theme_void()
The spatial distribution of vehicle flood loss is surprising. First of all, only a small portion of the CBGs see any flood loss at all. If you remember the map of the worst-case flooding scenario, though, only a portion of the city was projected to see any inundation, so this matches our expectations.
Another very interesting thing to note is the two CBGs that are significant outliers in terms of the magnitude of the expected loss. These bright red CBGs represent 79.64% of all loss in the city, which is a huge contribution to the overall total. Let’s investigate a bit more to see if these values make sense.
## plot population
getCensus(name = 'acs/acs5', vars = 'B01001_001E', vintage = 2018,
regionin = 'state:06+county:041', region = 'block group:*') %>%
mutate(GEOID = paste0(state, county, tract, block_group)) %>%
select(GEOID, pop = B01001_001E) %>%
left_join(sr_cbgs, ., by = 'GEOID') %>%
ggplot() +
geom_sf(aes(fill = pop/1e3)) +
geom_sf(data = sanrafael, fill = NA, color = 'black', size = 1) +
scale_fill_distiller(name = '', palette = 'Blues', direction = 1,
label = comma_format(accuracy = 1, suffix = 'K')) +
ggtitle('Population by CBG') +
theme_void() + theme(legend.position = "bottom")
## plot households & vehicles
g1 <- sr_households %>%
group_by(GEOID) %>%
summarize(num_households = sum(estimate), .groups = 'drop') %>%
left_join(sr_tracts, ., by = 'GEOID') %>%
ggplot() +
geom_sf(aes(fill = num_households/1e3)) +
geom_sf(data = sanrafael, fill = NA, color = 'black', size = 1) +
scale_fill_distiller(name = '', palette = 'Blues', direction = 1,
label = comma_format(accuracy = 0.1, suffix = 'K')) +
ggtitle('Households by Tract') +
theme_void() + theme(legend.position = "bottom")
g2 <- sr_vehicles %>%
group_by(GEOID, vehicles) %>%
summarize(estimate = sum(estimate), .groups = 'drop_last') %>%
mutate(num_vehicles = suppressWarnings(toNumber(str_sub(vehicles, end = 1)) %>% ifelse(is.na(.), 0, .)),
total_vehicles = num_vehicles*estimate) %>%
summarize(total_vehicles = sum(total_vehicles), .groups = 'drop')%>%
left_join(sr_tracts, ., by = 'GEOID') %>%
ggplot() +
geom_sf(aes(fill = total_vehicles/1e3)) +
geom_sf(data = sanrafael, fill = NA, color = 'black', size = 1) +
scale_fill_distiller(name = '', palette = 'Blues', direction = 1,
label = comma_format(accuracy = 1, suffix = 'K')) +
ggtitle('Vehicles by Tract') +
theme_void() + theme(legend.position = "bottom")
gridExtra::grid.arrange(g1, g2, ncol = 2)
Looking at the distributions of households, vehicles, and population makes the story of these anomalous CBGs even more confusing. While they appear as a location with a high concentration of residents on the population plot, they seem to have lower numbers of households and vehicles relative to the rest of the city. How can you have high numbers of people with low numbers of households?
leaflet() %>%
addMapboxTiles(style_id = "satellite-streets-v11", username = "mapbox") %>%
addPolygons(data = damage.plot %>% filter(GEOID %in% canalst))
One question is answered immediately: these two CBGs lie parallel to a waterway, so that explains at least one aspect of the extremely high flood loss value. In addition, it’s a bit difficult to tell from the leaflet map, but if you pull up this area in Google Maps and look at the labels a lot of the buildings along this stretch of Canal St are large apartment complexes. As part of the allocation of buildings -> households -> vehicles, we assumed that the maximum number of household units within any individual building was 25. It seems likely that this is an underestimate along this ten-ish block stretch, which may help explain the disconnect between the high population value and the low number of households. We may even be underestimating the number of vehicles in these CBGs.
It’s very possible that the residents in these CBGs (we’ll call them the Canal St CBGs) have a lot of cars in their buildings, and more likely than not those cars are probably garaged in subgrade levels. The maps of inundation also show this particular area as being more prone to flooding than other parts of the city. However, if the vehicles in this area are better protected than we are assuming, then the overall loss for the entire study area changes dramatically. Without further information about the Canal St CBGs and the number/elevation of the vehicles within them, I think that there is a very high level of uncertainty about our total expected loss for San Rafael.
## get median income by CBG
med_income <-
getCensus(name = 'acs/acs5', vars = 'group(B19001)', vintage = 2018,
regionin = 'state:06+county:041', region = 'block group:*') %>%
mutate(GEOID = paste0(state, county, tract, block_group)) %>%
select(-state, -county, -tract, -block_group, -GEO_ID, -NAME) %>%
select(-ends_with('M'), -ends_with('A')) %>%
pivot_longer(cols = ends_with('E'), names_to = 'variable', values_to = 'estimate') %>%
left_join(acs_vars_2018_5yr %>% select('name', 'label'), by = c('variable' = 'name')) %>%
separate(label, c(NA, NA, 'income_bracket'), sep = '!!', fill = 'right') %>%
filter(!is.na(income_bracket)) %>%
pivot_wider(id_cols = GEOID, names_from = income_bracket, values_from = estimate)
income_bracket <- 1e3*c(seq(10, 50, 5), 60, 75, 100, 125, 150, 200, 1000)
temp <- med_income[,-1] %>%
apply(1, function(x) cumsum(x)/sum(x)) %>% t %>%
apply(1, function(x) c(x[last(which(x < 0.5))], last(which(x < 0.5)),
x[first(which(x > 0.5))])) %>% t %>%
as.data.frame %>%
setNames(c('prop.start', 'id.start', 'prop.end')) %>%
cbind(GEOID = med_income$GEOID, .) %>%
mutate(id.end = id.start + 1) %>%
left_join(data.frame(income.start = income_bracket, id = 1:16), by = c('id.start' = 'id')) %>%
left_join(data.frame(income.end = income_bracket, id = 1:16), by = c('id.end' = 'id')) %>%
mutate(med_income = round(income.start + (0.5-prop.start)*
(income.end-income.start)/(prop.end-prop.start))) %>%
select(GEOID, med_income) %>%
filter(!is.na(med_income)) %>%
left_join(sr_cbgs, ., by = 'GEOID')
## scatterplot of flood damage vs. median income
damage.plot %>%
st_drop_geometry %>%
left_join(temp, ., by = 'GEOID') %>%
st_drop_geometry %>%
ggplot() +
geom_point(aes(x = med_income/1e3, y = round(total_cost/1e3))) +
geom_vline(aes(xintercept = 71228/1e3, color = 'California'), linetype = 'dashed') +
geom_vline(aes(xintercept = 126373/1e3, color = 'Marin County'), linetype = 'dashed') +
scale_color_manual(name = 'Median Income', values = c('royalblue1', 'darkorange1')) +
scale_x_continuous(label = comma_format(accuracy = 1, prefix = '$', suffix = 'K'),
limits = c(0, NA), expand = expansion(c(0, 0.07))) +
scale_y_log10(label = comma_format(prefix = '$', accuracy = 1, suffix = 'K')) +
annotation_logticks(sides = 'l') +
ggtitle('Median Income vs. Expected Loss') +
labs(x = 'Median Income by CBG', y = 'Expected Vehicle Flood Loss by CBG') +
theme_classic()
## Warning: Transformation introduced infinite values in continuous y-axis
Another lens to look at this data is to look at whether the loss is affecting portions of the city that are already socioeconomically disadvantaged. In the plot above, we’re looking at the expected vehicle flood losses by CBG mapped against the median incomes of each CBG. The 2018 median incomes in Marin County (orange) and California overall (blue) are included for reference. The half-hidden points at the very bottom of the plot are CBGs within San Rafael where no damage was recorded, included for reference. It does seem like the areas that are experiencing the worst flooding are already a bit worse off than the rest of the city, and definitely the rest of the county.
## do a breakout by vehicle ownership level
damage.vehicles <- damage.id %>%
st_drop_geometry %>%
filter(SLR == '000') %>%
group_by(house.id, RP) %>%
mutate(vehicles = length(SLR),
vehicle.id = sample(1:length(SLR), size = length(SLR), replace = FALSE)) %>%
ungroup %>%
select(house.id, vehicle.id, vehicles, cost) %>%
group_by(house.id, vehicle.id, vehicles) %>%
summarize(cost = sum(cost), .groups = 'drop') %>%
filter(cost > 0) %>%
group_by(vehicles) %>%
summarize(households = length(unique(house.id)), .groups = 'drop') %>%
.[1:4,] %>%
mutate(pct_damaged = households/sum(households))
sr_vehicles_pct %>%
mutate(vehicles = toNumber(str_sub(vehicles, end = 1)) %>% ifelse(is.na(.), 0, .)) %>%
left_join(damage.vehicles, by = 'vehicles') %>%
select(vehicles, pct_households = percentage, pct_damaged) %>%
pivot_longer(cols = -vehicles, names_to = 'type', values_to = 'pct') %>%
ggplot() +
geom_col(aes(x = factor(vehicles), y = pct, group = type, fill = type), position = 'dodge') +
scale_fill_manual(values = c('grey25', 'grey75'), labels = c('Flooded \nHouseholds', 'All Households')) +
scale_y_continuous(labels = percent, expand = c(0,0)) +
ggtitle('Loss Results by Vehicle Allocation') +
labs(x = 'Number of Vehicles Allocated to Household', y = 'Percentage of Households', fill = '') +
theme_classic() + theme(legend.position = c(0.8, 0.8))
## Warning: Problem with `mutate()` input `vehicles`.
## i NAs introduced by coercion
## i Input `vehicles` is ``%>%`(...)`.
## Warning in toNumber(str_sub(vehicles, end = 1)): NAs introduced by coercion
## Warning: Removed 1 rows containing missing values (geom_col).
One more way to look at our results is to consider how many single-vehicle households were affected. Because of the way that we distributed our vehicles geospatially, and because we chose to remove none of the vehicles before the flood event, there isn’t a ton of variation. For example, for a multiple-car household, the chance of having one vehicle flooded is essentially the same as having all your vehicles flooded, which is probably not true in real life. However, we can see if the proportion of single-vehicle households was larger in the areas that were flooded vs. the areas that were not.
In the bar chart above, the proportion of flooded households by number of vehicles allocated is slightly larger than the proportion of all households in every case. This is because households with zero vehicles are not represented in the flood loss results. However, it looks like there are slightly more households with 1 or 2 vehicles within the flooded area. This is an interesting finding, and it is worth exploring on a more granular level to see which households within San Rafael may be disproportionately affected by a flood event.
Finally, we need to repeat the AAL process for future decades. In order to do this we must update both our hazard (to account for climate change) and our exposure (to account for the growth in the number of vehicles over the next thirty years). To update our hazard, we can use climate data from the Representative Concentration Pathway (RCP) 8.5 model, which is a moderate- to high-emissions scenario. This data gives us the probability of exceeding a given level of sea level rise in a certain amount of time. This lets us combine our different sea level rise scenarios into one projected “future AAL” at every decade from now until 2050.
## find the likelihood of SLR exceeding some threshold using RCP 8.5
rcp85 <- read.csv('https://raw.githubusercontent.com/stanfordfuturebay/stanfordfuturebay.github.io/master/course/rcp85_sanfrancisco.csv')
names(rcp85) <- c('SLR (cm)', paste(seq(2020, 2100, 10)))
#### add new vehicles in future decades ####
## split the polygon into little pieces
grid.size <- 0.0025
pieces <- sanrafael %>%
st_make_grid(offset = round(st_bbox(sanrafael)[c('xmin','ymin')]/2, 3)*2 - grid.size/2,
cellsize = grid.size, what = 'polygons') %>% st_sf %>%
mutate(pieces.id = 1:nrow(.)) %>%
.[sanrafael,]
## although coordinates are longitude/latitude, st_intersects assumes that they are planar
## find the existing distribution of vehicles within the polygon
vehicle_dist <- sr_vehicles_new %>%
group_by(obs.id) %>%
summarize(num_vehicles = length(vehicles), .groups = 'drop') %>%
left_join(sr_buildings, ., by = 'obs.id') %>%
mutate(num_vehicles = ifelse(is.na(num_vehicles), 0, num_vehicles)) %>%
st_transform(crs_planar) %>%
st_centroid %>%
st_intersection(pieces %>% st_transform(crs_planar)) %>%
st_drop_geometry %>%
group_by(pieces.id) %>%
summarize(num_vehicles = sum(num_vehicles), .groups = 'drop')
## Warning in st_centroid.sf(.): st_centroid assumes attributes are constant over
## geometries of x
## Warning: attribute variables are assumed to be spatially constant throughout all
## geometries
locs <- sr_vehicles_new %>%
transmute(obs.id, type, year = 2018) %>%
right_join(sr_buildings %>% select(obs.id), ., by = 'obs.id') %>%
st_transform(crs_planar) %>%
.[sanrafael %>% st_transform(crs_planar),] %>%
st_centroid %>%
st_transform(crs_leaflet)
## Warning in st_centroid.sf(.): st_centroid assumes attributes are constant over
## geometries of x
nsplit <- function(x, n) {
p <- x/sum(x)
diff(round(n*cumsum(c(0,p))))
}
for (yr in 1:4) {
## find the distribution of new vehicles within the polygon
scale_factor <- diff(marin_vehicles$scale_factor)[yr]
vehicle_dist_new <- vehicle_dist %>%
mutate(new_vehicles = nsplit(num_vehicles, total_vehicles*scale_factor)) %>%
filter(new_vehicles > 0)
## assign latlongs to newly generated vehicles
cl <- makeCluster(num_cores)
new_locs <-
foreach (i = 1:nrow(vehicle_dist_new), .combine = 'rbind',
.packages = c('dplyr', 'sf')) %dopar% {
pieces %>%
subset(pieces.id == vehicle_dist_new$pieces.id[i]) %>%
st_transform(crs_planar) %>%
st_sample(size = vehicle_dist_new$new_vehicles[i], type = 'random') %>%
st_transform(crs_leaflet) %>%
st_as_sf %>%
mutate(obs.id = NA,
type = sample(c('sedan', 'pickup'), size = nrow(.), replace = TRUE,
prob = c(pct_sedans, 1-pct_sedans)),
year = seq(2020, 2050, 10)[yr]) %>%
select(obs.id, type, year, geometry = x)
}
stopCluster(cl)
## combine with existing information
locs <- rbind(locs, new_locs)
}
data.frame(table(locs$year)) %>%
setNames(c('Year', 'Vehicles')) %>%
mutate(Vehicles = comma(cumsum(Vehicles)))
## Year Vehicles
## 1 2018 39,820
## 2 2020 40,992
## 3 2030 47,182
## 4 2040 51,555
## 5 2050 54,262
To update the exposure, we need to calculate the number of new vehicles each decade and assign them a location within San Rafael. As previously mentioned, we are assuming that the spatial/demographic distribution of vehicle ownership stays exactly the same in Marin County and in San Rafael, which makes it easy to scale linearly. For each decade starting with 2020 (because our ACS data is from 2018), we will hold on to all of the existing vehicle locations, but we will randomly generate new vehicle locations based on the spatial density of existing vehicles to make up the difference. That process is implemented iteratively and results in the totals shown in the table above.
## get flood depths at all new vehicles
cl <- makeCluster(num_cores)
registerDoParallel(cl)
sr_depth_future <-
foreach (slr = c("000", "025", "050", "075", "100"),
.combine = 'rbind', .inorder = FALSE,
.packages = c('raster', 'sf', 'dplyr')) %:%
foreach (rp = c("001", "020", "100"),
.combine = 'rbind', .inorder = FALSE,
.packages = c('raster', 'sf', 'dplyr')) %dopar% {
flood <- raster(paste0("./SLR", slr, "_RP", rp, "_epa_flood.tif"))
locs %>%
filter(year > 2018) %>%
select(year, type) %>%
mutate(depth = raster::extract(x = flood, y = .) %>% ifelse(is.na(.), 0, .),
SLR = slr, RP = rp)
}
stopCluster(cl)
## find the total cost of vehicles flooded in each scenario
damage_future <- sr_depth_future %>%
mutate(sedan_damage =
approx(x = c(depthdamage$DepthAboveGround*12*2.54, 500),
y = c(depthdamage$Sedans_PctDamage, 1), xout = depth)$y,
pickup_damage =
approx(x = c(depthdamage$DepthAboveGround*12*2.54, 500),
y = c(depthdamage$Pickups_PctDamage, 1), xout = depth)$y) %>%
mutate(sedan_cost = 20000 * sedan_damage,
pickup_cost = 35000 * pickup_damage) %>%
mutate(cost = case_when(type == 'sedan' ~ sedan_cost, type == 'pickup' ~ pickup_cost)) %>%
select(SLR, RP, year, cost)
## combine damage estimates from current vehicles + future vehicles
damage_future.id <- damage_future %>%
st_transform(crs_planar) %>%
st_intersection(sr_cbgs %>% select(CBG = GEOID) %>% st_transform(crs_planar)) %>%
st_transform(crs_leaflet) %>%
group_by(CBG, SLR, RP, year) %>%
summarize(cost = sum(cost), .groups = 'drop')
## Warning: attribute variables are assumed to be spatially constant throughout all
## geometries
damage_all.id <- damage.id %>%
filter(complete.cases(st_drop_geometry(.))) %>%
st_transform(crs_planar) %>%
st_centroid %>%
st_transform(crs_leaflet) %>%
mutate(year = 2018) %>%
select(CBG, SLR, RP, year, cost) %>%
rbind(damage_future.id)
## Warning in st_centroid.sf(.): st_centroid assumes attributes are constant over
## geometries of x
damage_all.plot <- damage_all.id %>%
st_drop_geometry %>%
select(CBG, SLR, RP, year, cost) %>%
pivot_wider(names_from = RP, values_from = cost, values_fn = Sum) %>%
mutate(cost = 0.95*(`001`+`020`)/2 + 0.04*(`020`+`100`)/2 + 0.01*(`100`)) %>%
select(CBG, SLR, year, cost)
Once we find the flood depths at the new vehicle locations, we can move on to predicting adjusted future AALs by decade. We are not updating our vulnerability information, and we are not updating our risk estimation process, so all results are reported in 2018 dollars. That means that all of the increases in loss shown in the next few plots are real increases, not just the result of inflation.
## predict AAL in future decades
cost_all <- damage_all.plot %>%
left_join(rcp85 %>%
mutate(SLR = str_pad(`SLR (cm)`, 3 , "left", "0")) %>%
select(SLR, `2020`, `2030`, `2040`, `2050`), by = 'SLR')
AAL_all <-
foreach(yr = c(2018, seq(2020, 2050, 10)), .combine = 'rbind') %do% {
cost_all %>%
subset(year == yr) %>%
pivot_longer(`2020`:`2050`, names_to = "scenario", values_to = "occurrence") %>%
pivot_longer(c(cost, occurrence), names_to = "key", values_to = "value") %>%
pivot_wider(names_from = c("key", "SLR"), values_from = value) %>%
subset(scenario == ifelse(yr == 2018, 2020, yr)) %>%
mutate(total_cost = occurrence_000 * (cost_000 + cost_025)/2 +
occurrence_025 * (cost_025 + cost_050)/2 +
occurrence_050 * (cost_050 + cost_075)/2 +
occurrence_075 * (cost_075 + cost_100)/2 +
occurrence_100 * (cost_100)) %>%
select(CBG, year, total_cost)
}
AAL_table <- AAL_all %>%
group_by(year) %>%
summarize(total_cost = sum(total_cost), .groups = 'drop')
ggplot(AAL_table) +
geom_line(aes(x = toNumber(year), y = cumsum(total_cost)/1e6)) +
geom_point(aes(x = toNumber(year), y = cumsum(total_cost)/1e6)) +
geom_text(aes(x = toNumber(year)+3, y = cumsum(total_cost)/1e6-0.1,
label = comma(cumsum(total_cost), prefix = '$'))) +
ggtitle('Projected Future AAL for Vehicles Exposed to Flood',
subtitle = 'City of San Rafael, Marin County') +
scale_x_continuous(expand = c(0.1, 0.1)) +
scale_y_continuous(name = 'Estimated AAL',
labels = comma_format(accuracy = 1, prefix = '$', suffix = 'M')) +
theme_classic() + theme(axis.title.x = element_blank())
## plot future loss
cost_leaflet <- AAL_all %>%
pivot_wider(id_cols = 'CBG', names_from = 'year', values_from = 'total_cost') %>%
mutate(`2020` = `2020`+`2018`,
`2030` = `2030`+`2020`,
`2040` = `2040`+`2030`,
`2050` = `2050`+`2040`) %>%
select(-`2018`) %>%
filter(`2020` + `2030` + `2040` + `2050` > 0) %>%
right_join(sr_cbgs %>% select(CBG = GEOID), ., by = 'CBG')
cost_max <- cost_leaflet %>% st_drop_geometry %>% select(-CBG) %>% apply(2, max) %>% max
aal_pal <- colorNumeric(palette = "Reds", domain = c(0, cost_max))
canalst_pct <- (cost_leaflet %>% filter(CBG %in% canalst) %>% .$`2050`) / sum(cost_leaflet$`2050`)
cost_leaflet %>%
leaflet() %>%
addMapboxTiles(style_id = "light-v9", username = "mapbox") %>%
addPolygons(data = sr_cbgs, fillColor = 'white', color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
label = ~paste('No flooding recorded')) %>%
addPolygons(fillColor = ~aal_pal(`2020`), color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
highlightOptions = highlightOptions(color = "white", weight = 2),
label = ~paste0("$",prettyNum(signif(`2020`,2),",")," average annualized loss in 2020"),
group = "2020") %>%
addPolygons(fillColor = ~aal_pal(`2030`), color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
highlightOptions = highlightOptions(color = "white", weight = 2),
label = ~paste0("$",prettyNum(signif(`2030`,2),",")," average annualized loss in 2020"),
group = "2030") %>%
addPolygons(fillColor = ~aal_pal(`2040`), color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
highlightOptions = highlightOptions(color = "white", weight = 2),
label = ~paste0("$",prettyNum(signif(`2040`,2),",")," average annualized loss in 2020"),
group = "2040") %>%
addPolygons(fillColor = ~aal_pal(`2050`), color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
highlightOptions = highlightOptions(color = "white", weight = 2),
label = ~paste0("$",prettyNum(signif(`2050`,2),",")," average annualized loss in 2050"),
group = "2050") %>%
addLegend(pal = aal_pal, values = ~`2050`, title = "AAL") %>%
addLayersControl(baseGroups = c("2020", "2030", "2040", "2050"),
options = layersControlOptions(collapsed = FALSE)) %>%
showGroup("2050")
This plot shows us how the magnitude of loss changes by CBG over the next few decades. The Canal St CBG continues to emerge as a hotspot of flood-related loss, but some other counties are beginning to catch up; by 2050, the Canal St CBG only represents 46.19%, 26.72% of total losses. The southernmost CBG that encircles an inlet starts to see much more damage in the future as well.
## plot changes in loss
increase_leaflet <- AAL_all %>%
filter(total_cost > 0) %>%
select(CBG) %>% unique %>%
left_join(AAL_all, by = 'CBG') %>%
pivot_wider(id_cols = 'CBG', names_from = 'year', values_from = 'total_cost') %>%
mutate(`2020` = `2020`+`2018`,
`2030` = `2030`+`2020`,
`2040` = `2040`+`2030`,
`2050` = `2050`+`2040`) %>%
pivot_longer(cols = `2018`:`2050`, names_to = 'year', values_to = 'total_cost') %>%
group_by(CBG) %>%
mutate(cost_increase = total_cost/min(total_cost)) %>%
mutate(cost_increase = ifelse(is.nan(cost_increase), 0, cost_increase)) %>%
mutate(cost_increase = ifelse(is.infinite(cost_increase), 5, cost_increase)) %>%
pivot_wider(id_cols = 'CBG', names_from = 'year', values_from = 'cost_increase') %>%
right_join(sr_cbgs %>% select(CBG = GEOID), ., by = 'CBG')
increase_max <- increase_leaflet %>% st_drop_geometry %>% select(-CBG) %>% apply(2, max) %>% max
aal_pal <- colorNumeric(palette = "Greens", domain = c(0, increase_max))
increase_leaflet %>%
filter(!(`2020` == 5 | `2030` == 5 | `2040` == 5 | `2050` == 5)) %>%
leaflet() %>%
addMapboxTiles(style_id = "light-v9", username = "mapbox") %>%
addPolygons(data = sr_cbgs, fillColor = 'white', color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
label = ~paste('No flooding recorded')) %>%
addPolygons(fillColor = ~aal_pal(`2020`), color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
highlightOptions = highlightOptions(color = "white", weight = 2),
label = ~paste0(round(`2020`,2), 'x the current AAL'),
group = "2020") %>%
addPolygons(fillColor = ~aal_pal(`2030`), color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
highlightOptions = highlightOptions(color = "white", weight = 2),
label = ~paste0(round(`2030`,2), 'x the current AAL'),
group = "2030") %>%
addPolygons(fillColor = ~aal_pal(`2040`), color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
highlightOptions = highlightOptions(color = "white", weight = 2),
label = ~paste0(round(`2040`,2), 'x the current AAL'),
group = "2040") %>%
addPolygons(fillColor = ~aal_pal(`2050`), color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
highlightOptions = highlightOptions(color = "white", weight = 2),
label = ~paste0(round(`2050`,2), 'x the current AAL'),
group = "2050") %>%
addPolygons(data = increase_leaflet %>% filter(`2030` == 5),
fillColor = ~aal_pal(`2050`), color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
highlightOptions = highlightOptions(color = "white", weight = 2),
label = ~paste0('Flooded for the first time in 2030'),
group = c("2030", "2040", "2050")) %>%
addPolygons(data = increase_leaflet %>% filter(`2040` == 5 & `2030` != 5),
fillColor = ~aal_pal(`2050`), color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
highlightOptions = highlightOptions(color = "white", weight = 2),
label = ~paste0('Flooded for the first time in 2040'),
group = c("2040", "2050")) %>%
addPolygons(data = increase_leaflet %>% filter(`2050` == 5 & `2040` != 5),
fillColor = ~aal_pal(`2050`), color = "gray",
fillOpacity = 1, opacity = 1, weight = 0.25,
highlightOptions = highlightOptions(color = "white", weight = 2),
label = ~paste0('Flooded for the first time in 2050'),
group = "2050") %>%
addLegend(pal = aal_pal, values = ~`2050`, title = "AAL Change") %>%
addLayersControl(baseGroups = c("2020", "2030", "2040", "2050"),
options = layersControlOptions(collapsed = FALSE)) %>%
showGroup("2050")
Finally, this plot shows us where losses are expected to change the most. By 2020 we do not expect significant changes to where flood-related vehicle losses will occur from our original (2018) analysis. By 2030, we add an additional CBG along the Bay that is not currently projected to see any damage. By 2040, there are some larger changes: losses stay the same in some places, but in others they more than double. By 2050, we are seeing losses that are up to 5x what we expect today. This map in particular highlights the importance of considering sea level rise in loss projections for San Rafael.
To summarize, we performed an analysis to estimate the flood AAL for vehicles in the city of San Rafael. We found that while only portions of the city are exposed to flooding, those portions are projected to see millions of dollars of flood damage to their vehicles, especially in concentrated downtown areas near the bay and the canal. We looked at the spatial distribution of those losses, as well as considered some of the socioeconomic factors that may exacerbate the disparate impacts of flooding on different populations. Finally, we showed how those losses are expected to increase when we take climate change and sea level rise into account.
We gained some insights from all of these processes, but it is important to note the several assumptions and the deep uncertainties inherent in this analysis. More granular information and on-the-ground local knowledge is needed to refine and validate the accuracy of these estimates.